
%   Cmpts.m
%       A.P. Paplinski   25 May 1999
%   A simple competitive learning 

figure(1), clf reset
% for c = 1:4
% Cmpti ;
W = winit ;
V = zeros(N, m, p);  % to store all weights
V(1,:,:) = W ;  
wnm = ones(m,1) ;

eta = 0.08 ;    % learning gain
deta = 1-1/N ;  % learning gain decaying factor

for  k = 1:N
  xn = X(:,k)' ;
% the current vector is compared with all weight vectors
  xmw = xn(wnm,:)-W ;
  [win jwin] = min(sum((xmw.^2),2)) ;
% the weights of the winning neurons are update
  W(jwin,:) = W(jwin,:) + eta*xmw(jwin,:) ;
  V(k,:,:) = W ;
% eta = eta*deta ;
end 

% subplot(2,2,c)
plot(X(1,:),X(2,:),'g.',clst(1,:),clst(2,:),'ro', ...
     winit(:,1),winit(:,2),'bx' , ...
     V(:,:,1), V(:,:,2), 'b', W(:,1),W(:,2) , 'r*'), grid
%  title('Simple Competitive Learning')
end

% print -f1 -deps2 Cmpts

